More x86_64 stuff.
3ddb79c3KhTI0F_Iw_hRL9QEyOVK-g xen/include/asm-x86/cache.h
404f1b920OQVnrbnXnySS-WxrH9Wzw xen/include/asm-x86/config.h
3ddb79c2LLt11EQHjrd6sB7FUqvFfA xen/include/asm-x86/cpufeature.h
-3ddb79c2ADvRmdexd9y3AYK9_NTx-Q xen/include/asm-x86/current.h
+40cf1596ajIU1KJfF22XD-tSLfH6XA xen/include/asm-x86/current.h
3ddb79c2jFkPAZTDmU35L6IUssYMgQ xen/include/asm-x86/debugreg.h
3ddb79c3r9-31dIsewPV3P3i8HALsQ xen/include/asm-x86/delay.h
3ddb79c34BFiXjBJ_cCKB0aCsV1IDw xen/include/asm-x86/desc.h
404f1bb41Yl-5ZjIWnG66HDCj6OIWA xen/include/asm-x86/pda.h
4022a73diKn2Ax4-R4gzk59lm1YdDg xen/include/asm-x86/pdb.h
3ddb79c2QF5-pZGzuX4QukPCDAl59A xen/include/asm-x86/processor.h
-3ddb79c3mbqEM7QQr3zVq7NiBNhouA xen/include/asm-x86/ptrace.h
+40cf1596bim9F9DNdV75klgRSZ6Y2A xen/include/asm-x86/ptrace.h
3ddb79c2plf7ciNgoNjU-RsbUzawsw xen/include/asm-x86/rwlock.h
3ddb79c3Hgbb2g8CyWLMCK-6_ZVQSQ xen/include/asm-x86/smp.h
3ddb79c3jn8ALV_S9W5aeTYUQRKBpg xen/include/asm-x86/smpboot.h
3e397e66xPNc8eaSqC9pPbyAtRGzHA xen/include/asm-x86/time.h
3e450943TfE-iovQIY_tMO_VdGsPhA xen/include/asm-x86/timex.h
3ddb79c4HugMq7IYGxcQKFBpKwKhzA xen/include/asm-x86/types.h
-3ddb79c3M2n1ROZH6xk3HbyN4CPDqg xen/include/asm-x86/uaccess.h
+40cf1596saFaHD5DC5zvrSn7CDCWGQ xen/include/asm-x86/uaccess.h
3ddb79c3uPGcP_l_2xyGgBSWd5aC-Q xen/include/asm-x86/unaligned.h
+3ddb79c2ADvRmdexd9y3AYK9_NTx-Q xen/include/asm-x86/x86_32/current.h
+3ddb79c3mbqEM7QQr3zVq7NiBNhouA xen/include/asm-x86/x86_32/ptrace.h
+3ddb79c3M2n1ROZH6xk3HbyN4CPDqg xen/include/asm-x86/x86_32/uaccess.h
404f1b9ceJeGVaPNIENm2FkK0AgEOQ xen/include/asm-x86/x86_64/current.h
404f1b9fl6AQ_a-T1TDK3fuwTPXmHw xen/include/asm-x86/x86_64/desc.h
404f1badfXZJZ2sU8sh9PS2EZvd19Q xen/include/asm-x86/x86_64/ldt.h
404f1bb1LSCqrMDSfRAti5NdMQPJBQ xen/include/asm-x86/x86_64/page.h
-404f1bb756fZfxk5HDx7J7BW3R-1jQ xen/include/asm-x86/x86_64/processor.h
404f1bb86rAXB3aLS1vYdcqpJiEcyg xen/include/asm-x86/x86_64/ptrace.h
404f1bc4tWkB9Qr8RkKtZGW5eMQzhw xen/include/asm-x86/x86_64/uaccess.h
400304fcmRQmDdFYEzDh0wcBba9alg xen/include/hypervisor-ifs/COPYING
{
if ( cpu_has_fxsr ) {
asm volatile( "fxsave %0 ; fnclex"
- : "=m" (tsk->thread.i387.fxsave) );
+ : "=m" (tsk->thread.i387) );
} else {
asm volatile( "fnsave %0 ; fwait"
- : "=m" (tsk->thread.i387.fsave) );
+ : "=m" (tsk->thread.i387) );
}
clear_bit(PF_USEDFPU, &tsk->flags);
}
{
if ( cpu_has_fxsr ) {
asm volatile( "fxrstor %0"
- : : "m" (tsk->thread.i387.fxsave) );
+ : : "m" (tsk->thread.i387) );
} else {
asm volatile( "frstor %0"
- : : "m" (tsk->thread.i387.fsave) );
+ : : "m" (tsk->thread.i387) );
}
}
#include <asm/pdb.h>
char ignore_irq13; /* set if exception 16 works */
-struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
+struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1 };
/* Lots of nice things, since we only target PPro+. */
unsigned long mmu_cr4_features = X86_CR4_PSE | X86_CR4_PGE;
#ifdef CONFIG_SMP
-/* Set if we find a B stepping CPU */
-static int smp_b_stepping;
-
/* Setup configured maximum number of CPUs to activate */
static int max_cpus = -1;
void __init smp_store_cpu_info(int id)
{
- struct cpuinfo_x86 *c = cpu_data + id;
-
- *c = boot_cpu_data;
- c->pte_quick = 0;
- c->pmd_quick = 0;
- c->pgd_quick = 0;
- c->pgtable_cache_sz = 0;
- identify_cpu(c);
- /*
- * Mask B, Pentium, but not Pentium MMX
- */
- if (c->x86_vendor == X86_VENDOR_INTEL &&
- c->x86 == 5 &&
- c->x86_mask >= 1 && c->x86_mask <= 4 &&
- c->x86_model <= 3)
- /*
- * Remember we have B step Pentia with bugs
- */
- smp_b_stepping = 1;
+ cpu_data[id] = boot_cpu_data;
+ identify_cpu(&cpu_data[id]);
}
/*
}
smp_num_cpus = cpucount + 1;
- if (smp_b_stepping)
- printk("WARNING: SMP operation may"
- " be unreliable with B stepping processors.\n");
Dprintk("Boot done.\n");
/*
/* Find information saved during fault and dump it to the console. */
tss = &init_tss[cpu];
- printk("CPU: %d\nEIP: %04x:[<%08lx>] \nEFLAGS: %08lx\n",
+ printk("CPU: %d\nEIP: %04x:[<%08x>] \nEFLAGS: %08x\n",
cpu, tss->cs, tss->eip, tss->eflags);
- printk("CR3: %08lx\n", tss->__cr3);
- printk("eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
+ printk("CR3: %08x\n", tss->__cr3);
+ printk("eax: %08x ebx: %08x ecx: %08x edx: %08x\n",
tss->eax, tss->ebx, tss->ecx, tss->edx);
- printk("esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
+ printk("esi: %08x edi: %08x ebp: %08x esp: %08x\n",
tss->esi, tss->edi, tss->ebp, tss->esp);
printk("ds: %04x es: %04x fs: %04x gs: %04x ss: %04x\n",
tss->ds, tss->es, tss->fs, tss->gs, tss->ss);
#include <xen/shadow.h>
#include <hypervisor-ifs/sched_ctl.h>
-
#define TRC_DOM0OP_ENTER_BASE 0x00020000
#define TRC_DOM0OP_LEAVE_BASE 0x00030000
-
extern unsigned int alloc_new_dom_mem(struct task_struct *, unsigned int);
static int msr_cpu_mask;
rdmsr(msr_addr, msr_lo, msr_hi);
}
-
long do_dom0_op(dom0_op_t *u_dom0_op)
{
long ret = 0;
This costs 4MB -- may want to fix some day */
/* Pin the ownership of the MP table so that DOM0 can map it later. */
- for ( mfn = virt_to_phys((void *)RDWR_MPT_VIRT_START)>>PAGE_SHIFT;
- mfn < virt_to_phys((void *)RDWR_MPT_VIRT_END)>>PAGE_SHIFT;
+ for ( mfn = virt_to_phys(&machine_to_phys_mapping[0])>>PAGE_SHIFT;
+ mfn < virt_to_phys(&machine_to_phys_mapping[1024*1024])>>PAGE_SHIFT;
mfn++ )
{
frame_table[mfn].count_and_flags = 1 | PGC_allocated;
#if defined(__x86_64__)
#define PML4_ENTRY_BITS 39
-#define PML4_ENTRY_BYTES (1<<PML4_ENTRY_BITS)
+#define PML4_ENTRY_BYTES (1UL<<PML4_ENTRY_BITS)
/*
* Memory layout:
/* Next 4MB of virtual address space used for per-domain mappings (eg. GDT). */
#define PERDOMAIN_VIRT_START (SH_LINEAR_PT_VIRT_END)
#define PERDOMAIN_VIRT_END (PERDOMAIN_VIRT_START + (4*1024*1024))
-#define GDT_VIRT_START (PERDOMAIN_VIRT_START)
-#define GDT_VIRT_END (GDT_VIRT_START + (64*1024))
-#define LDT_VIRT_START (GDT_VIRT_END)
-#define LDT_VIRT_END (LDT_VIRT_START + (64*1024))
/* Penultimate 4MB of virtual address space used for domain page mappings. */
#define MAPCACHE_VIRT_START (PERDOMAIN_VIRT_END)
#define MAPCACHE_VIRT_END (MAPCACHE_VIRT_START + (4*1024*1024))
#endif /* __i386__ */
+#define GDT_VIRT_START (PERDOMAIN_VIRT_START)
+#define GDT_VIRT_END (GDT_VIRT_START + (64*1024))
+#define LDT_VIRT_START (GDT_VIRT_END)
+#define LDT_VIRT_END (LDT_VIRT_START + (64*1024))
+
#endif /* __XEN_I386_CONFIG_H__ */
-#ifndef _X86_CURRENT_H
-#define _X86_CURRENT_H
-struct task_struct;
-
-#define STACK_RESERVED \
- (sizeof(execution_context_t) + sizeof(struct task_struct *))
-
-static inline struct task_struct * get_current(void)
-{
- struct task_struct *current;
- __asm__ ( "orl %%esp,%0; andl $~3,%0; movl (%0),%0"
- : "=r" (current) : "0" (STACK_SIZE-4) );
- return current;
-}
-
-#define current get_current()
-
-static inline void set_current(struct task_struct *p)
-{
- __asm__ ( "orl %%esp,%0; andl $~3,%0; movl %1,(%0)"
- : : "r" (STACK_SIZE-4), "r" (p) );
-}
-
-static inline execution_context_t *get_execution_context(void)
-{
- execution_context_t *execution_context;
- __asm__ ( "andl %%esp,%0; addl %2,%0"
- : "=r" (execution_context)
- : "0" (~(STACK_SIZE-1)), "i" (STACK_SIZE-STACK_RESERVED) );
- return execution_context;
-}
-
-static inline unsigned long get_stack_top(void)
-{
- unsigned long p;
- __asm__ ( "orl %%esp,%0; andl $~3,%0"
- : "=r" (p) : "0" (STACK_SIZE-4) );
- return p;
-}
-
-#define schedule_tail(_p) \
- __asm__ __volatile__ ( \
- "andl %%esp,%0; addl %2,%0; movl %0,%%esp; jmp *%1" \
- : : "r" (~(STACK_SIZE-1)), \
- "r" (unlikely(is_idle_task((_p))) ? \
- continue_cpu_idle_loop : \
- continue_nonidle_task), \
- "i" (STACK_SIZE-STACK_RESERVED) )
-
-
-#endif /* _X86_CURRENT_H */
+#ifdef __x86_64__
+#include <asm/x86_64/current.h>
+#else
+#include <asm/x86_32/current.h>
+#endif
struct task_struct *pcurrent; /* Current process */
int irqcount; /* Irq nesting counter. Starts with -1 */
int cpunumber; /* Logical CPU number */
- /* XXX: could be a single list */
- unsigned long *pgd_quick;
- unsigned long *pmd_quick;
- unsigned long *pte_quick;
- unsigned long pgtable_cache_sz;
char *irqstackptr; /* top of irqstack */
unsigned long volatile *level4_pgt;
} ____cacheline_aligned;
/*
- * include/asm-i386/processor.h
+ * include/asm-x86/processor.h
*
* Copyright (C) 1994 Linus Torvalds
*/
-#ifndef __ASM_I386_PROCESSOR_H
-#define __ASM_I386_PROCESSOR_H
+#ifndef __ASM_X86_PROCESSOR_H
+#define __ASM_X86_PROCESSOR_H
#include <asm/page.h>
#include <asm/types.h>
* Default implementation of macro that returns current
* instruction pointer ("program counter").
*/
+#ifdef __x86_64__
+#define current_text_addr() ({ void *pc; asm volatile("leaq 1f(%%rip),%0\n1:":"=r"(pc)); pc; })
+#else
#define current_text_addr() \
({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
+#endif
/*
* CPU type and hardware bug flags. Kept separately for each CPU.
__u8 x86_vendor; /* CPU vendor */
__u8 x86_model;
__u8 x86_mask;
- char wp_works_ok; /* It doesn't on 386's */
- char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */
- char hard_math;
- char rfu;
int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
__u32 x86_capability[NCAPINTS];
char x86_vendor_id[16];
- char x86_model_id[64];
- int x86_cache_size; /* in KB - valid for CPUS which support this
- call */
- int fdiv_bug;
- int f00f_bug;
- int coma_bug;
- unsigned long loops_per_jiffy;
- unsigned long *pgd_quick;
- unsigned long *pmd_quick;
- unsigned long *pte_quick;
- unsigned long pgtable_cache_sz;
+ int x86_cache_size; /* in KB - for CPUS that support this call */
+ int x86_clflush_size;
+ int x86_tlbsize; /* number of 4K pages in DTLB/ITLB combined */
} __attribute__((__aligned__(SMP_CACHE_BYTES)));
#define X86_VENDOR_INTEL 0
#define X86_CR0_PG 0x80000000 /* Paging (RW) */
#define read_cr0() ({ \
- unsigned int __dummy; \
+ unsigned long __dummy; \
__asm__( \
- "movl %%cr0,%0\n\t" \
+ "mov"__OS" %%cr0,%0\n\t" \
:"=r" (__dummy)); \
__dummy; \
})
#define write_cr0(x) \
- __asm__("movl %0,%%cr0": :"r" (x));
+ __asm__("mov"__OS" %0,%%cr0": :"r" (x));
/*
static inline void set_in_cr4 (unsigned long mask)
{
mmu_cr4_features |= mask;
- __asm__("movl %%cr4,%%eax\n\t"
- "orl %0,%%eax\n\t"
- "movl %%eax,%%cr4\n"
+ __asm__("mov"__OS" %%cr4,%%"__OP"ax\n\t"
+ "or"__OS" %0,%%"__OP"ax\n\t"
+ "mov"__OS" %%"__OP"ax,%%cr4\n"
: : "irg" (mask)
:"ax");
}
static inline void clear_in_cr4 (unsigned long mask)
{
mmu_cr4_features &= ~mask;
- __asm__("movl %%cr4,%%eax\n\t"
- "andl %0,%%eax\n\t"
- "movl %%eax,%%cr4\n"
+ __asm__("mov"__OS" %%cr4,%%"__OP"ax\n\t"
+ "and"__OS" %0,%%"__OP"ax\n\t"
+ "movl"__OS" %%"__OP"ax,%%cr4\n"
: : "irg" (~mask)
:"ax");
}
-
-
-/*
- * Cyrix CPU configuration register indexes
- */
-#define CX86_CCR0 0xc0
-#define CX86_CCR1 0xc1
-#define CX86_CCR2 0xc2
-#define CX86_CCR3 0xc3
-#define CX86_CCR4 0xe8
-#define CX86_CCR5 0xe9
-#define CX86_CCR6 0xea
-#define CX86_CCR7 0xeb
-#define CX86_DIR0 0xfe
-#define CX86_DIR1 0xff
-#define CX86_ARR_BASE 0xc4
-#define CX86_RCR_BASE 0xdc
-
-/*
- * Cyrix CPU indexed register access macros
- */
-
-#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
-
-#define setCx86(reg, data) do { \
- outb((reg), 0x22); \
- outb((data), 0x23); \
-} while (0)
-
-#define EISA_bus (0)
-#define MCA_bus (0)
-
-/* from system description table in BIOS. Mostly for MCA use, but
-others may find it useful. */
-extern unsigned int machine_id;
-extern unsigned int machine_submodel_id;
-extern unsigned int BIOS_revision;
-extern unsigned int mca_pentium_flag;
-
-/*
- * User space process size: 3GB (default).
- */
-#define TASK_SIZE (PAGE_OFFSET)
-
-/* This decides where the kernel will search for a free chunk of vm
- * space during mmap's.
- */
-#define TASK_UNMAPPED_BASE (TASK_SIZE / 3)
-
/*
* Size of io_bitmap in longwords:
* For Xen we support the full 8kbyte IO bitmap but use the io_bitmap_sel field
#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
#define INVALID_IO_BITMAP_OFFSET 0x8000
-struct i387_fsave_struct {
- long cwd;
- long swd;
- long twd;
- long fip;
- long fcs;
- long foo;
- long fos;
- long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
- long status; /* software status information */
-};
-
-struct i387_fxsave_struct {
- unsigned short cwd;
- unsigned short swd;
- unsigned short twd;
- unsigned short fop;
- long fip;
- long fcs;
- long foo;
- long fos;
- long mxcsr;
- long reserved;
- long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
- long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
- long padding[56];
+struct i387_state {
+ u8 state[512]; /* big enough for FXSAVE */
} __attribute__ ((aligned (16)));
-struct i387_soft_struct {
- long cwd;
- long swd;
- long twd;
- long fip;
- long fcs;
- long foo;
- long fos;
- long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
- unsigned char ftop, changed, lookahead, no_update, rm, alimit;
- struct info *info;
- unsigned long entry_eip;
-};
-
-union i387_union {
- struct i387_fsave_struct fsave;
- struct i387_fxsave_struct fxsave;
- struct i387_soft_struct soft;
-};
-
typedef struct {
unsigned long seg;
} mm_segment_t;
struct tss_struct {
unsigned short back_link,__blh;
- unsigned long esp0;
- unsigned short ss0,__ss0h;
- unsigned long esp1;
- unsigned short ss1,__ss1h;
- unsigned long esp2;
- unsigned short ss2,__ss2h;
- unsigned long __cr3;
- unsigned long eip;
- unsigned long eflags;
- unsigned long eax,ecx,edx,ebx;
- unsigned long esp;
- unsigned long ebp;
- unsigned long esi;
- unsigned long edi;
- unsigned short es, __esh;
- unsigned short cs, __csh;
- unsigned short ss, __ssh;
- unsigned short ds, __dsh;
- unsigned short fs, __fsh;
- unsigned short gs, __gsh;
- unsigned short ldt, __ldth;
- unsigned short trace, bitmap;
- unsigned long io_bitmap[IO_BITMAP_SIZE+1];
- /*
- * pads the TSS to be cacheline-aligned (total size is 0x2080)
- */
- unsigned long __cacheline_filler[5];
+#ifdef __x86_64__
+ u64 rsp0;
+ u64 rsp1;
+ u64 rsp2;
+ u64 reserved1;
+ u64 ist[7];
+ u64 reserved2;
+ u16 reserved3;
+#else
+ u32 esp0;
+ u16 ss0,__ss0h;
+ u32 esp1;
+ u16 ss1,__ss1h;
+ u32 esp2;
+ u16 ss2,__ss2h;
+ u32 __cr3;
+ u32 eip;
+ u32 eflags;
+ u32 eax,ecx,edx,ebx;
+ u32 esp;
+ u32 ebp;
+ u32 esi;
+ u32 edi;
+ u16 es, __esh;
+ u16 cs, __csh;
+ u16 ss, __ssh;
+ u16 ds, __dsh;
+ u16 fs, __fsh;
+ u16 gs, __gsh;
+ u16 ldt, __ldth;
+ u16 trace;
+#endif
+ u16 bitmap;
+ u32 io_bitmap[IO_BITMAP_SIZE+1];
+ /* Pads the TSS to be cacheline-aligned (total size is 0x2080). */
+ u32 __cacheline_filler[5];
};
struct thread_struct {
- unsigned long guestos_sp, guestos_ss;
+ unsigned long guestos_sp;
+ unsigned long guestos_ss;
/* Hardware debugging registers */
- unsigned long debugreg[8]; /* %%db0-7 debug registers */
+ unsigned long debugreg[8]; /* %%db0-7 debug registers */
/* floating point info */
- union i387_union i387;
+ struct i387_state i387;
/* Trap info. */
- int fast_trap_idx;
- struct desc_struct fast_trap_desc;
- trap_info_t traps[256];
+#ifdef __i386__
+ int fast_trap_idx;
+ struct desc_struct fast_trap_desc;
+#endif
+ trap_info_t traps[256];
};
#define IDT_ENTRIES 256
extern struct desc_struct idt_table[];
extern struct desc_struct *idt_tables[];
+#if defined(__i386__)
+
#define SET_DEFAULT_FAST_TRAP(_p) \
(_p)->fast_trap_idx = 0x20; \
(_p)->fast_trap_desc.a = 0; \
{ [0 ... IO_BITMAP_SIZE] = ~0UL }, /* ioperm */ \
}
+#elif defined(__x86_64__)
+
+#define INIT_THREAD { 0 }
+
+#define INIT_TSS { \
+ 0,0, \
+ 0,0,0,0,{0},0,0, \
+ 0, INVALID_IO_BITMAP_OFFSET, \
+ { [0 ... IO_BITMAP_SIZE] = ~0UL } \
+}
+
+#endif /* __x86_64__ */
+
struct mm_struct {
/*
* Every domain has a L1 pagetable of its own. Per-domain mappings
/* shadow mode status and controls */
unsigned int shadow_mode; /* flags to control shadow table operation */
pagetable_t shadow_table;
- spinlock_t shadow_lock;
+ spinlock_t shadow_lock;
unsigned int shadow_max_page_count; // currently unused
/* shadow hashtable */
unsigned int shadow_fault_count;
unsigned int shadow_dirty_count;
-
/* Current LDT details. */
unsigned long ldt_base, ldt_ents, shadow_ldt_mapcnt;
/* Next entry is passed to LGDT on domain switch. */
- char gdt[6];
+ char gdt[10]; /* NB. 10 bytes needed for x86_64. Use 6 bytes for x86_32. */
};
static inline void write_ptbase(struct mm_struct *mm)
else
pa = pagetable_val(mm->pagetable);
- __asm__ __volatile__ ( "movl %0, %%cr3" : : "r" (pa) : "memory" );
+ __asm__ __volatile__ ( "mov"__OS" %0, %%cr3" : : "r" (pa) : "memory" );
}
#define IDLE0_MM \
/* Convenient accessor for mm.gdt. */
#define SET_GDT_ENTRIES(_p, _e) ((*(u16 *)((_p)->mm.gdt + 0)) = (_e))
-#define SET_GDT_ADDRESS(_p, _a) ((*(u32 *)((_p)->mm.gdt + 2)) = (_a))
+#define SET_GDT_ADDRESS(_p, _a) ((*(unsigned long *)((_p)->mm.gdt + 2)) = (_a))
#define GET_GDT_ENTRIES(_p) ((*(u16 *)((_p)->mm.gdt + 0)))
-#define GET_GDT_ADDRESS(_p) ((*(u32 *)((_p)->mm.gdt + 2)))
+#define GET_GDT_ADDRESS(_p) ((*(unsigned long *)((_p)->mm.gdt + 2)))
long set_gdt(struct task_struct *p,
unsigned long *frames,
#endif
-#endif /* __ASM_I386_PROCESSOR_H */
+#endif /* __ASM_X86_PROCESSOR_H */
-#ifndef _I386_PTRACE_H
-#define _I386_PTRACE_H
-
-struct pt_regs {
- long ebx;
- long ecx;
- long edx;
- long esi;
- long edi;
- long ebp;
- long eax;
- int xds;
- int xes;
- int xfs;
- int xgs;
- long orig_eax;
- long eip;
- int xcs;
- long eflags;
- long esp;
- int xss;
-};
-
-enum EFLAGS {
- EF_CF = 0x00000001,
- EF_PF = 0x00000004,
- EF_AF = 0x00000010,
- EF_ZF = 0x00000040,
- EF_SF = 0x00000080,
- EF_TF = 0x00000100,
- EF_IE = 0x00000200,
- EF_DF = 0x00000400,
- EF_OF = 0x00000800,
- EF_IOPL = 0x00003000,
- EF_IOPL_RING0 = 0x00000000,
- EF_IOPL_RING1 = 0x00001000,
- EF_IOPL_RING2 = 0x00002000,
- EF_NT = 0x00004000, /* nested task */
- EF_RF = 0x00010000, /* resume */
- EF_VM = 0x00020000, /* virtual mode */
- EF_AC = 0x00040000, /* alignment */
- EF_VIF = 0x00080000, /* virtual interrupt */
- EF_VIP = 0x00100000, /* virtual interrupt pending */
- EF_ID = 0x00200000, /* id */
-};
-
-#ifdef __KERNEL__
-#define user_mode(regs) ((3 & (regs)->xcs))
-#endif
+#ifdef __x86_64__
+#include <asm/x86_64/ptrace.h>
+#else
+#include <asm/x86_32/ptrace.h>
#endif
* Some non intel clones support out of order store. wmb() ceases to be a
* nop for these.
*/
-
+#if defined(__i386__)
#define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
-#define rmb() mb()
-
+#define rmb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
#ifdef CONFIG_X86_OOSTORE
#define wmb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
-#else
+#endif
+#elif defined(__x86_64__)
+#define mb() __asm__ __volatile__ ("mfence":::"memory")
+#define rmb() __asm__ __volatile__ ("lfence":::"memory")
+#ifdef CONFIG_X86_OOSTORE
+#define wmb() __asm__ __volatile__ ("sfence":::"memory")
+#endif
+#endif
+
+#ifndef CONFIG_X86_OOSTORE
#define wmb() __asm__ __volatile__ ("": : :"memory")
#endif
-#ifndef __i386_UACCESS_H
-#define __i386_UACCESS_H
-/*
- * User space memory access functions
- */
-#include <xen/config.h>
-#include <xen/errno.h>
-#include <xen/sched.h>
-#include <xen/prefetch.h>
-#include <asm/page.h>
-
-#define VERIFY_READ 0
-#define VERIFY_WRITE 1
-
-/*
- * The fs value determines whether argument validity checking should be
- * performed or not. If get_fs() == USER_DS, checking is performed, with
- * get_fs() == KERNEL_DS, checking is bypassed.
- *
- * For historical reasons, these macros are grossly misnamed.
- */
-
-#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
-
-
-#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
-#define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
-
-#define get_ds() (KERNEL_DS)
-#define get_fs() (current->addr_limit)
-#define set_fs(x) (current->addr_limit = (x))
-
-#define segment_eq(a,b) ((a).seg == (b).seg)
-
-extern int __verify_write(const void *, unsigned long);
-
-#define __addr_ok(addr) ((unsigned long)(addr) < (current->addr_limit.seg))
-
-/*
- * Uhhuh, this needs 33-bit arithmetic. We have a carry..
- */
-#define __range_ok(addr,size) ({ \
- unsigned long flag,sum; \
- asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \
- :"=&r" (flag), "=r" (sum) \
- :"1" (addr),"g" ((int)(size)),"g" (current->addr_limit.seg)); \
- flag; })
-
-#define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
-
-static inline int verify_area(int type, const void * addr, unsigned long size)
-{
- return access_ok(type,addr,size) ? 0 : -EFAULT;
-}
-
-
-/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue. No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path. This means when everything is well,
- * we don't even have to jump over them. Further, they do not intrude
- * on our cache or tlb entries.
- */
-
-struct exception_table_entry
-{
- unsigned long insn, fixup;
-};
-
-/* Returns 0 if exception not found and fixup otherwise. */
-extern unsigned long search_exception_table(unsigned long);
-
-
-/*
- * These are the main single-value transfer routines. They automatically
- * use the right size if we just have the right pointer type.
- *
- * This gets kind of ugly. We want to return _two_ values in "get_user()"
- * and yet we don't want to do any pointers, because that is too much
- * of a performance impact. Thus we have a few rather ugly macros here,
- * and hide all the uglyness from the user.
- *
- * The "__xxx" versions of the user access functions are versions that
- * do not verify the address space, that must have been done previously
- * with a separate "access_ok()" call (this is used when we do multiple
- * accesses to the same area of user memory).
- */
-
-extern void __get_user_1(void);
-extern void __get_user_2(void);
-extern void __get_user_4(void);
-
-#define __get_user_x(size,ret,x,ptr) \
- __asm__ __volatile__("call __get_user_" #size \
- :"=a" (ret),"=d" (x) \
- :"0" (ptr))
-
-/* Careful: we have to cast the result to the type of the pointer for sign reasons */
-#define get_user(x,ptr) \
-({ int __ret_gu=1,__val_gu; \
- switch(sizeof (*(ptr))) { \
- case 1: __ret_gu=copy_from_user(&__val_gu,ptr,1); break; \
- case 2: __ret_gu=copy_from_user(&__val_gu,ptr,2); break; \
- case 4: __ret_gu=copy_from_user(&__val_gu,ptr,4); break; \
- default: __ret_gu=copy_from_user(&__val_gu,ptr,8); break; \
- /*case 1: __get_user_x(1,__ret_gu,__val_gu,ptr); break;*/ \
- /*case 2: __get_user_x(2,__ret_gu,__val_gu,ptr); break;*/ \
- /*case 4: __get_user_x(4,__ret_gu,__val_gu,ptr); break;*/ \
- /*default: __get_user_x(X,__ret_gu,__val_gu,ptr); break;*/ \
- } \
- (x) = (__typeof__(*(ptr)))__val_gu; \
- __ret_gu; \
-})
-
-extern void __put_user_1(void);
-extern void __put_user_2(void);
-extern void __put_user_4(void);
-extern void __put_user_8(void);
-
-extern void __put_user_bad(void);
-
-#define put_user(x,ptr) \
- __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
-
-#define __get_user(x,ptr) \
- __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
-#define __put_user(x,ptr) \
- __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
-
-#define __put_user_nocheck(x,ptr,size) \
-({ \
- long __pu_err; \
- __put_user_size((x),(ptr),(size),__pu_err); \
- __pu_err; \
-})
-
-
-#define __put_user_check(x,ptr,size) \
-({ \
- long __pu_err = -EFAULT; \
- __typeof__(*(ptr)) *__pu_addr = (ptr); \
- if (access_ok(VERIFY_WRITE,__pu_addr,size)) \
- __put_user_size((x),__pu_addr,(size),__pu_err); \
- __pu_err; \
-})
-
-#define __put_user_u64(x, addr, err) \
- __asm__ __volatile__( \
- "1: movl %%eax,0(%2)\n" \
- "2: movl %%edx,4(%2)\n" \
- "3:\n" \
- ".section .fixup,\"ax\"\n" \
- "4: movl %3,%0\n" \
- " jmp 3b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 4\n" \
- " .long 1b,4b\n" \
- " .long 2b,4b\n" \
- ".previous" \
- : "=r"(err) \
- : "A" (x), "r" (addr), "i"(-EFAULT), "0"(err))
-
-#define __put_user_size(x,ptr,size,retval) \
-do { \
- retval = 0; \
- switch (size) { \
- case 1: __put_user_asm(x,ptr,retval,"b","b","iq"); break; \
- case 2: __put_user_asm(x,ptr,retval,"w","w","ir"); break; \
- case 4: __put_user_asm(x,ptr,retval,"l","","ir"); break; \
- case 8: __put_user_u64(x,ptr,retval); break; \
- default: __put_user_bad(); \
- } \
-} while (0)
-
-struct __large_struct { unsigned long buf[100]; };
-#define __m(x) (*(struct __large_struct *)(x))
-
-/*
- * Tell gcc we read from memory instead of writing: this is because
- * we do not write to any memory gcc knows about, so there are no
- * aliasing issues.
- */
-#define __put_user_asm(x, addr, err, itype, rtype, ltype) \
- __asm__ __volatile__( \
- "1: mov"itype" %"rtype"1,%2\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3: movl %3,%0\n" \
- " jmp 2b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 4\n" \
- " .long 1b,3b\n" \
- ".previous" \
- : "=r"(err) \
- : ltype (x), "m"(__m(addr)), "i"(-EFAULT), "0"(err))
-
-
-#define __get_user_nocheck(x,ptr,size) \
-({ \
- long __gu_err, __gu_val; \
- __get_user_size(__gu_val,(ptr),(size),__gu_err); \
- (x) = (__typeof__(*(ptr)))__gu_val; \
- __gu_err; \
-})
-
-extern long __get_user_bad(void);
-
-#define __get_user_size(x,ptr,size,retval) \
-do { \
- retval = 0; \
- switch (size) { \
- case 1: __get_user_asm(x,ptr,retval,"b","b","=q"); break; \
- case 2: __get_user_asm(x,ptr,retval,"w","w","=r"); break; \
- case 4: __get_user_asm(x,ptr,retval,"l","","=r"); break; \
- default: (x) = __get_user_bad(); \
- } \
-} while (0)
-
-#define __get_user_asm(x, addr, err, itype, rtype, ltype) \
- __asm__ __volatile__( \
- "1: mov"itype" %2,%"rtype"1\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3: movl %3,%0\n" \
- " xor"itype" %"rtype"1,%"rtype"1\n" \
- " jmp 2b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 4\n" \
- " .long 1b,3b\n" \
- ".previous" \
- : "=r"(err), ltype (x) \
- : "m"(__m(addr)), "i"(-EFAULT), "0"(err))
-
-
-/*
- * Copy To/From Userspace
- */
-
-/* Generic arbitrary sized copy. */
-#define __copy_user(to,from,size) \
-do { \
- int __d0, __d1; \
- __asm__ __volatile__( \
- "0: rep; movsl\n" \
- " movl %3,%0\n" \
- "1: rep; movsb\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3: lea 0(%3,%0,4),%0\n" \
- " jmp 2b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 4\n" \
- " .long 0b,3b\n" \
- " .long 1b,2b\n" \
- ".previous" \
- : "=&c"(size), "=&D" (__d0), "=&S" (__d1) \
- : "r"(size & 3), "0"(size / 4), "1"(to), "2"(from) \
- : "memory"); \
-} while (0)
-
-#define __copy_user_zeroing(to,from,size) \
-do { \
- int __d0, __d1; \
- __asm__ __volatile__( \
- "0: rep; movsl\n" \
- " movl %3,%0\n" \
- "1: rep; movsb\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3: lea 0(%3,%0,4),%0\n" \
- "4: pushl %0\n" \
- " pushl %%eax\n" \
- " xorl %%eax,%%eax\n" \
- " rep; stosb\n" \
- " popl %%eax\n" \
- " popl %0\n" \
- " jmp 2b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 4\n" \
- " .long 0b,3b\n" \
- " .long 1b,4b\n" \
- ".previous" \
- : "=&c"(size), "=&D" (__d0), "=&S" (__d1) \
- : "r"(size & 3), "0"(size / 4), "1"(to), "2"(from) \
- : "memory"); \
-} while (0)
-
-/* We let the __ versions of copy_from/to_user inline, because they're often
- * used in fast paths and have only a small space overhead.
- */
-static inline unsigned long
-__generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
-{
- __copy_user_zeroing(to,from,n);
- return n;
-}
-
-static inline unsigned long
-__generic_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
-{
- __copy_user(to,from,n);
- return n;
-}
-
-
-/* Optimize just a little bit when we know the size of the move. */
-#define __constant_copy_user(to, from, size) \
-do { \
- int __d0, __d1; \
- switch (size & 3) { \
- default: \
- __asm__ __volatile__( \
- "0: rep; movsl\n" \
- "1:\n" \
- ".section .fixup,\"ax\"\n" \
- "2: shl $2,%0\n" \
- " jmp 1b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 4\n" \
- " .long 0b,2b\n" \
- ".previous" \
- : "=c"(size), "=&S" (__d0), "=&D" (__d1)\
- : "1"(from), "2"(to), "0"(size/4) \
- : "memory"); \
- break; \
- case 1: \
- __asm__ __volatile__( \
- "0: rep; movsl\n" \
- "1: movsb\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3: shl $2,%0\n" \
- "4: incl %0\n" \
- " jmp 2b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 4\n" \
- " .long 0b,3b\n" \
- " .long 1b,4b\n" \
- ".previous" \
- : "=c"(size), "=&S" (__d0), "=&D" (__d1)\
- : "1"(from), "2"(to), "0"(size/4) \
- : "memory"); \
- break; \
- case 2: \
- __asm__ __volatile__( \
- "0: rep; movsl\n" \
- "1: movsw\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3: shl $2,%0\n" \
- "4: addl $2,%0\n" \
- " jmp 2b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 4\n" \
- " .long 0b,3b\n" \
- " .long 1b,4b\n" \
- ".previous" \
- : "=c"(size), "=&S" (__d0), "=&D" (__d1)\
- : "1"(from), "2"(to), "0"(size/4) \
- : "memory"); \
- break; \
- case 3: \
- __asm__ __volatile__( \
- "0: rep; movsl\n" \
- "1: movsw\n" \
- "2: movsb\n" \
- "3:\n" \
- ".section .fixup,\"ax\"\n" \
- "4: shl $2,%0\n" \
- "5: addl $2,%0\n" \
- "6: incl %0\n" \
- " jmp 3b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 4\n" \
- " .long 0b,4b\n" \
- " .long 1b,5b\n" \
- " .long 2b,6b\n" \
- ".previous" \
- : "=c"(size), "=&S" (__d0), "=&D" (__d1)\
- : "1"(from), "2"(to), "0"(size/4) \
- : "memory"); \
- break; \
- } \
-} while (0)
-
-/* Optimize just a little bit when we know the size of the move. */
-#define __constant_copy_user_zeroing(to, from, size) \
-do { \
- int __d0, __d1; \
- switch (size & 3) { \
- default: \
- __asm__ __volatile__( \
- "0: rep; movsl\n" \
- "1:\n" \
- ".section .fixup,\"ax\"\n" \
- "2: pushl %0\n" \
- " pushl %%eax\n" \
- " xorl %%eax,%%eax\n" \
- " rep; stosl\n" \
- " popl %%eax\n" \
- " popl %0\n" \
- " shl $2,%0\n" \
- " jmp 1b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 4\n" \
- " .long 0b,2b\n" \
- ".previous" \
- : "=c"(size), "=&S" (__d0), "=&D" (__d1)\
- : "1"(from), "2"(to), "0"(size/4) \
- : "memory"); \
- break; \
- case 1: \
- __asm__ __volatile__( \
- "0: rep; movsl\n" \
- "1: movsb\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3: pushl %0\n" \
- " pushl %%eax\n" \
- " xorl %%eax,%%eax\n" \
- " rep; stosl\n" \
- " stosb\n" \
- " popl %%eax\n" \
- " popl %0\n" \
- " shl $2,%0\n" \
- " incl %0\n" \
- " jmp 2b\n" \
- "4: pushl %%eax\n" \
- " xorl %%eax,%%eax\n" \
- " stosb\n" \
- " popl %%eax\n" \
- " incl %0\n" \
- " jmp 2b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 4\n" \
- " .long 0b,3b\n" \
- " .long 1b,4b\n" \
- ".previous" \
- : "=c"(size), "=&S" (__d0), "=&D" (__d1)\
- : "1"(from), "2"(to), "0"(size/4) \
- : "memory"); \
- break; \
- case 2: \
- __asm__ __volatile__( \
- "0: rep; movsl\n" \
- "1: movsw\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3: pushl %0\n" \
- " pushl %%eax\n" \
- " xorl %%eax,%%eax\n" \
- " rep; stosl\n" \
- " stosw\n" \
- " popl %%eax\n" \
- " popl %0\n" \
- " shl $2,%0\n" \
- " addl $2,%0\n" \
- " jmp 2b\n" \
- "4: pushl %%eax\n" \
- " xorl %%eax,%%eax\n" \
- " stosw\n" \
- " popl %%eax\n" \
- " addl $2,%0\n" \
- " jmp 2b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 4\n" \
- " .long 0b,3b\n" \
- " .long 1b,4b\n" \
- ".previous" \
- : "=c"(size), "=&S" (__d0), "=&D" (__d1)\
- : "1"(from), "2"(to), "0"(size/4) \
- : "memory"); \
- break; \
- case 3: \
- __asm__ __volatile__( \
- "0: rep; movsl\n" \
- "1: movsw\n" \
- "2: movsb\n" \
- "3:\n" \
- ".section .fixup,\"ax\"\n" \
- "4: pushl %0\n" \
- " pushl %%eax\n" \
- " xorl %%eax,%%eax\n" \
- " rep; stosl\n" \
- " stosw\n" \
- " stosb\n" \
- " popl %%eax\n" \
- " popl %0\n" \
- " shl $2,%0\n" \
- " addl $3,%0\n" \
- " jmp 2b\n" \
- "5: pushl %%eax\n" \
- " xorl %%eax,%%eax\n" \
- " stosw\n" \
- " stosb\n" \
- " popl %%eax\n" \
- " addl $3,%0\n" \
- " jmp 2b\n" \
- "6: pushl %%eax\n" \
- " xorl %%eax,%%eax\n" \
- " stosb\n" \
- " popl %%eax\n" \
- " incl %0\n" \
- " jmp 3b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 4\n" \
- " .long 0b,4b\n" \
- " .long 1b,5b\n" \
- " .long 2b,6b\n" \
- ".previous" \
- : "=c"(size), "=&S" (__d0), "=&D" (__d1)\
- : "1"(from), "2"(to), "0"(size/4) \
- : "memory"); \
- break; \
- } \
-} while (0)
-
-unsigned long __generic_copy_to_user(void *, const void *, unsigned long);
-unsigned long __generic_copy_from_user(void *, const void *, unsigned long);
-
-static inline unsigned long
-__constant_copy_to_user(void *to, const void *from, unsigned long n)
-{
- prefetch(from);
- if (access_ok(VERIFY_WRITE, to, n))
- __constant_copy_user(to,from,n);
- return n;
-}
-
-static inline unsigned long
-__constant_copy_from_user(void *to, const void *from, unsigned long n)
-{
- if (access_ok(VERIFY_READ, from, n))
- __constant_copy_user_zeroing(to,from,n);
- else
- memset(to, 0, n);
- return n;
-}
-
-static inline unsigned long
-__constant_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
-{
- __constant_copy_user(to,from,n);
- return n;
-}
-
-static inline unsigned long
-__constant_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
-{
- __constant_copy_user_zeroing(to,from,n);
- return n;
-}
-
-#define copy_to_user(to,from,n) \
- (__builtin_constant_p(n) ? \
- __constant_copy_to_user((to),(from),(n)) : \
- __generic_copy_to_user((to),(from),(n)))
-
-#define copy_from_user(to,from,n) \
- (__builtin_constant_p(n) ? \
- __constant_copy_from_user((to),(from),(n)) : \
- __generic_copy_from_user((to),(from),(n)))
-
-#define __copy_to_user(to,from,n) \
- (__builtin_constant_p(n) ? \
- __constant_copy_to_user_nocheck((to),(from),(n)) : \
- __generic_copy_to_user_nocheck((to),(from),(n)))
-
-#define __copy_from_user(to,from,n) \
- (__builtin_constant_p(n) ? \
- __constant_copy_from_user_nocheck((to),(from),(n)) : \
- __generic_copy_from_user_nocheck((to),(from),(n)))
-
-long strncpy_from_user(char *dst, const char *src, long count);
-long __strncpy_from_user(char *dst, const char *src, long count);
-#define strlen_user(str) strnlen_user(str, ~0UL >> 1)
-long strnlen_user(const char *str, long n);
-unsigned long clear_user(void *mem, unsigned long len);
-unsigned long __clear_user(void *mem, unsigned long len);
-
-#endif /* __i386_UACCESS_H */
+#ifdef __x86_64__
+#include <asm/x86_64/uaccess.h>
+#else
+#include <asm/x86_32/uaccess.h>
+#endif
--- /dev/null
+#ifndef _X86_CURRENT_H
+#define _X86_CURRENT_H
+
+struct task_struct;
+
+#define STACK_RESERVED \
+ (sizeof(execution_context_t) + sizeof(struct task_struct *))
+
+static inline struct task_struct * get_current(void)
+{
+ struct task_struct *current;
+ __asm__ ( "orl %%esp,%0; andl $~3,%0; movl (%0),%0"
+ : "=r" (current) : "0" (STACK_SIZE-4) );
+ return current;
+}
+
+#define current get_current()
+
+static inline void set_current(struct task_struct *p)
+{
+ __asm__ ( "orl %%esp,%0; andl $~3,%0; movl %1,(%0)"
+ : : "r" (STACK_SIZE-4), "r" (p) );
+}
+
+static inline execution_context_t *get_execution_context(void)
+{
+ execution_context_t *execution_context;
+ __asm__ ( "andl %%esp,%0; addl %2,%0"
+ : "=r" (execution_context)
+ : "0" (~(STACK_SIZE-1)), "i" (STACK_SIZE-STACK_RESERVED) );
+ return execution_context;
+}
+
+static inline unsigned long get_stack_top(void)
+{
+ unsigned long p;
+ __asm__ ( "orl %%esp,%0; andl $~3,%0"
+ : "=r" (p) : "0" (STACK_SIZE-4) );
+ return p;
+}
+
+#define schedule_tail(_p) \
+ __asm__ __volatile__ ( \
+ "andl %%esp,%0; addl %2,%0; movl %0,%%esp; jmp *%1" \
+ : : "r" (~(STACK_SIZE-1)), \
+ "r" (unlikely(is_idle_task((_p))) ? \
+ continue_cpu_idle_loop : \
+ continue_nonidle_task), \
+ "i" (STACK_SIZE-STACK_RESERVED) )
+
+
+#endif /* _X86_CURRENT_H */
--- /dev/null
+#ifndef _I386_PTRACE_H
+#define _I386_PTRACE_H
+
+struct pt_regs {
+ long ebx;
+ long ecx;
+ long edx;
+ long esi;
+ long edi;
+ long ebp;
+ long eax;
+ int xds;
+ int xes;
+ int xfs;
+ int xgs;
+ long orig_eax;
+ long eip;
+ int xcs;
+ long eflags;
+ long esp;
+ int xss;
+};
+
+enum EFLAGS {
+ EF_CF = 0x00000001,
+ EF_PF = 0x00000004,
+ EF_AF = 0x00000010,
+ EF_ZF = 0x00000040,
+ EF_SF = 0x00000080,
+ EF_TF = 0x00000100,
+ EF_IE = 0x00000200,
+ EF_DF = 0x00000400,
+ EF_OF = 0x00000800,
+ EF_IOPL = 0x00003000,
+ EF_IOPL_RING0 = 0x00000000,
+ EF_IOPL_RING1 = 0x00001000,
+ EF_IOPL_RING2 = 0x00002000,
+ EF_NT = 0x00004000, /* nested task */
+ EF_RF = 0x00010000, /* resume */
+ EF_VM = 0x00020000, /* virtual mode */
+ EF_AC = 0x00040000, /* alignment */
+ EF_VIF = 0x00080000, /* virtual interrupt */
+ EF_VIP = 0x00100000, /* virtual interrupt pending */
+ EF_ID = 0x00200000, /* id */
+};
+
+#ifdef __KERNEL__
+#define user_mode(regs) ((3 & (regs)->xcs))
+#endif
+
+#endif
--- /dev/null
+#ifndef __i386_UACCESS_H
+#define __i386_UACCESS_H
+
+/*
+ * User space memory access functions
+ */
+#include <xen/config.h>
+#include <xen/errno.h>
+#include <xen/sched.h>
+#include <xen/prefetch.h>
+#include <asm/page.h>
+
+#define VERIFY_READ 0
+#define VERIFY_WRITE 1
+
+/*
+ * The fs value determines whether argument validity checking should be
+ * performed or not. If get_fs() == USER_DS, checking is performed, with
+ * get_fs() == KERNEL_DS, checking is bypassed.
+ *
+ * For historical reasons, these macros are grossly misnamed.
+ */
+
+#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
+
+
+#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
+#define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
+
+#define get_ds() (KERNEL_DS)
+#define get_fs() (current->addr_limit)
+#define set_fs(x) (current->addr_limit = (x))
+
+#define segment_eq(a,b) ((a).seg == (b).seg)
+
+extern int __verify_write(const void *, unsigned long);
+
+#define __addr_ok(addr) ((unsigned long)(addr) < (current->addr_limit.seg))
+
+/*
+ * Uhhuh, this needs 33-bit arithmetic. We have a carry..
+ */
+#define __range_ok(addr,size) ({ \
+ unsigned long flag,sum; \
+ asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \
+ :"=&r" (flag), "=r" (sum) \
+ :"1" (addr),"g" ((int)(size)),"g" (current->addr_limit.seg)); \
+ flag; })
+
+#define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
+
+static inline int verify_area(int type, const void * addr, unsigned long size)
+{
+ return access_ok(type,addr,size) ? 0 : -EFAULT;
+}
+
+
+/*
+ * The exception table consists of pairs of addresses: the first is the
+ * address of an instruction that is allowed to fault, and the second is
+ * the address at which the program should continue. No registers are
+ * modified, so it is entirely up to the continuation code to figure out
+ * what to do.
+ *
+ * All the routines below use bits of fixup code that are out of line
+ * with the main instruction path. This means when everything is well,
+ * we don't even have to jump over them. Further, they do not intrude
+ * on our cache or tlb entries.
+ */
+
+struct exception_table_entry
+{
+ unsigned long insn, fixup;
+};
+
+/* Returns 0 if exception not found and fixup otherwise. */
+extern unsigned long search_exception_table(unsigned long);
+
+
+/*
+ * These are the main single-value transfer routines. They automatically
+ * use the right size if we just have the right pointer type.
+ *
+ * This gets kind of ugly. We want to return _two_ values in "get_user()"
+ * and yet we don't want to do any pointers, because that is too much
+ * of a performance impact. Thus we have a few rather ugly macros here,
+ * and hide all the uglyness from the user.
+ *
+ * The "__xxx" versions of the user access functions are versions that
+ * do not verify the address space, that must have been done previously
+ * with a separate "access_ok()" call (this is used when we do multiple
+ * accesses to the same area of user memory).
+ */
+
+extern void __get_user_1(void);
+extern void __get_user_2(void);
+extern void __get_user_4(void);
+
+#define __get_user_x(size,ret,x,ptr) \
+ __asm__ __volatile__("call __get_user_" #size \
+ :"=a" (ret),"=d" (x) \
+ :"0" (ptr))
+
+/* Careful: we have to cast the result to the type of the pointer for sign reasons */
+#define get_user(x,ptr) \
+({ int __ret_gu=1,__val_gu; \
+ switch(sizeof (*(ptr))) { \
+ case 1: __ret_gu=copy_from_user(&__val_gu,ptr,1); break; \
+ case 2: __ret_gu=copy_from_user(&__val_gu,ptr,2); break; \
+ case 4: __ret_gu=copy_from_user(&__val_gu,ptr,4); break; \
+ default: __ret_gu=copy_from_user(&__val_gu,ptr,8); break; \
+ /*case 1: __get_user_x(1,__ret_gu,__val_gu,ptr); break;*/ \
+ /*case 2: __get_user_x(2,__ret_gu,__val_gu,ptr); break;*/ \
+ /*case 4: __get_user_x(4,__ret_gu,__val_gu,ptr); break;*/ \
+ /*default: __get_user_x(X,__ret_gu,__val_gu,ptr); break;*/ \
+ } \
+ (x) = (__typeof__(*(ptr)))__val_gu; \
+ __ret_gu; \
+})
+
+extern void __put_user_1(void);
+extern void __put_user_2(void);
+extern void __put_user_4(void);
+extern void __put_user_8(void);
+
+extern void __put_user_bad(void);
+
+#define put_user(x,ptr) \
+ __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
+
+#define __get_user(x,ptr) \
+ __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
+#define __put_user(x,ptr) \
+ __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
+
+#define __put_user_nocheck(x,ptr,size) \
+({ \
+ long __pu_err; \
+ __put_user_size((x),(ptr),(size),__pu_err); \
+ __pu_err; \
+})
+
+
+#define __put_user_check(x,ptr,size) \
+({ \
+ long __pu_err = -EFAULT; \
+ __typeof__(*(ptr)) *__pu_addr = (ptr); \
+ if (access_ok(VERIFY_WRITE,__pu_addr,size)) \
+ __put_user_size((x),__pu_addr,(size),__pu_err); \
+ __pu_err; \
+})
+
+#define __put_user_u64(x, addr, err) \
+ __asm__ __volatile__( \
+ "1: movl %%eax,0(%2)\n" \
+ "2: movl %%edx,4(%2)\n" \
+ "3:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "4: movl %3,%0\n" \
+ " jmp 3b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 1b,4b\n" \
+ " .long 2b,4b\n" \
+ ".previous" \
+ : "=r"(err) \
+ : "A" (x), "r" (addr), "i"(-EFAULT), "0"(err))
+
+#define __put_user_size(x,ptr,size,retval) \
+do { \
+ retval = 0; \
+ switch (size) { \
+ case 1: __put_user_asm(x,ptr,retval,"b","b","iq"); break; \
+ case 2: __put_user_asm(x,ptr,retval,"w","w","ir"); break; \
+ case 4: __put_user_asm(x,ptr,retval,"l","","ir"); break; \
+ case 8: __put_user_u64(x,ptr,retval); break; \
+ default: __put_user_bad(); \
+ } \
+} while (0)
+
+struct __large_struct { unsigned long buf[100]; };
+#define __m(x) (*(struct __large_struct *)(x))
+
+/*
+ * Tell gcc we read from memory instead of writing: this is because
+ * we do not write to any memory gcc knows about, so there are no
+ * aliasing issues.
+ */
+#define __put_user_asm(x, addr, err, itype, rtype, ltype) \
+ __asm__ __volatile__( \
+ "1: mov"itype" %"rtype"1,%2\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: movl %3,%0\n" \
+ " jmp 2b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 1b,3b\n" \
+ ".previous" \
+ : "=r"(err) \
+ : ltype (x), "m"(__m(addr)), "i"(-EFAULT), "0"(err))
+
+
+#define __get_user_nocheck(x,ptr,size) \
+({ \
+ long __gu_err, __gu_val; \
+ __get_user_size(__gu_val,(ptr),(size),__gu_err); \
+ (x) = (__typeof__(*(ptr)))__gu_val; \
+ __gu_err; \
+})
+
+extern long __get_user_bad(void);
+
+#define __get_user_size(x,ptr,size,retval) \
+do { \
+ retval = 0; \
+ switch (size) { \
+ case 1: __get_user_asm(x,ptr,retval,"b","b","=q"); break; \
+ case 2: __get_user_asm(x,ptr,retval,"w","w","=r"); break; \
+ case 4: __get_user_asm(x,ptr,retval,"l","","=r"); break; \
+ default: (x) = __get_user_bad(); \
+ } \
+} while (0)
+
+#define __get_user_asm(x, addr, err, itype, rtype, ltype) \
+ __asm__ __volatile__( \
+ "1: mov"itype" %2,%"rtype"1\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: movl %3,%0\n" \
+ " xor"itype" %"rtype"1,%"rtype"1\n" \
+ " jmp 2b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 1b,3b\n" \
+ ".previous" \
+ : "=r"(err), ltype (x) \
+ : "m"(__m(addr)), "i"(-EFAULT), "0"(err))
+
+
+/*
+ * Copy To/From Userspace
+ */
+
+/* Generic arbitrary sized copy. */
+#define __copy_user(to,from,size) \
+do { \
+ int __d0, __d1; \
+ __asm__ __volatile__( \
+ "0: rep; movsl\n" \
+ " movl %3,%0\n" \
+ "1: rep; movsb\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: lea 0(%3,%0,4),%0\n" \
+ " jmp 2b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 0b,3b\n" \
+ " .long 1b,2b\n" \
+ ".previous" \
+ : "=&c"(size), "=&D" (__d0), "=&S" (__d1) \
+ : "r"(size & 3), "0"(size / 4), "1"(to), "2"(from) \
+ : "memory"); \
+} while (0)
+
+#define __copy_user_zeroing(to,from,size) \
+do { \
+ int __d0, __d1; \
+ __asm__ __volatile__( \
+ "0: rep; movsl\n" \
+ " movl %3,%0\n" \
+ "1: rep; movsb\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: lea 0(%3,%0,4),%0\n" \
+ "4: pushl %0\n" \
+ " pushl %%eax\n" \
+ " xorl %%eax,%%eax\n" \
+ " rep; stosb\n" \
+ " popl %%eax\n" \
+ " popl %0\n" \
+ " jmp 2b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 0b,3b\n" \
+ " .long 1b,4b\n" \
+ ".previous" \
+ : "=&c"(size), "=&D" (__d0), "=&S" (__d1) \
+ : "r"(size & 3), "0"(size / 4), "1"(to), "2"(from) \
+ : "memory"); \
+} while (0)
+
+/* We let the __ versions of copy_from/to_user inline, because they're often
+ * used in fast paths and have only a small space overhead.
+ */
+static inline unsigned long
+__generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
+{
+ __copy_user_zeroing(to,from,n);
+ return n;
+}
+
+static inline unsigned long
+__generic_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
+{
+ __copy_user(to,from,n);
+ return n;
+}
+
+
+/* Optimize just a little bit when we know the size of the move. */
+#define __constant_copy_user(to, from, size) \
+do { \
+ int __d0, __d1; \
+ switch (size & 3) { \
+ default: \
+ __asm__ __volatile__( \
+ "0: rep; movsl\n" \
+ "1:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "2: shl $2,%0\n" \
+ " jmp 1b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 0b,2b\n" \
+ ".previous" \
+ : "=c"(size), "=&S" (__d0), "=&D" (__d1)\
+ : "1"(from), "2"(to), "0"(size/4) \
+ : "memory"); \
+ break; \
+ case 1: \
+ __asm__ __volatile__( \
+ "0: rep; movsl\n" \
+ "1: movsb\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: shl $2,%0\n" \
+ "4: incl %0\n" \
+ " jmp 2b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 0b,3b\n" \
+ " .long 1b,4b\n" \
+ ".previous" \
+ : "=c"(size), "=&S" (__d0), "=&D" (__d1)\
+ : "1"(from), "2"(to), "0"(size/4) \
+ : "memory"); \
+ break; \
+ case 2: \
+ __asm__ __volatile__( \
+ "0: rep; movsl\n" \
+ "1: movsw\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: shl $2,%0\n" \
+ "4: addl $2,%0\n" \
+ " jmp 2b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 0b,3b\n" \
+ " .long 1b,4b\n" \
+ ".previous" \
+ : "=c"(size), "=&S" (__d0), "=&D" (__d1)\
+ : "1"(from), "2"(to), "0"(size/4) \
+ : "memory"); \
+ break; \
+ case 3: \
+ __asm__ __volatile__( \
+ "0: rep; movsl\n" \
+ "1: movsw\n" \
+ "2: movsb\n" \
+ "3:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "4: shl $2,%0\n" \
+ "5: addl $2,%0\n" \
+ "6: incl %0\n" \
+ " jmp 3b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 0b,4b\n" \
+ " .long 1b,5b\n" \
+ " .long 2b,6b\n" \
+ ".previous" \
+ : "=c"(size), "=&S" (__d0), "=&D" (__d1)\
+ : "1"(from), "2"(to), "0"(size/4) \
+ : "memory"); \
+ break; \
+ } \
+} while (0)
+
+/* Optimize just a little bit when we know the size of the move. */
+#define __constant_copy_user_zeroing(to, from, size) \
+do { \
+ int __d0, __d1; \
+ switch (size & 3) { \
+ default: \
+ __asm__ __volatile__( \
+ "0: rep; movsl\n" \
+ "1:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "2: pushl %0\n" \
+ " pushl %%eax\n" \
+ " xorl %%eax,%%eax\n" \
+ " rep; stosl\n" \
+ " popl %%eax\n" \
+ " popl %0\n" \
+ " shl $2,%0\n" \
+ " jmp 1b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 0b,2b\n" \
+ ".previous" \
+ : "=c"(size), "=&S" (__d0), "=&D" (__d1)\
+ : "1"(from), "2"(to), "0"(size/4) \
+ : "memory"); \
+ break; \
+ case 1: \
+ __asm__ __volatile__( \
+ "0: rep; movsl\n" \
+ "1: movsb\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: pushl %0\n" \
+ " pushl %%eax\n" \
+ " xorl %%eax,%%eax\n" \
+ " rep; stosl\n" \
+ " stosb\n" \
+ " popl %%eax\n" \
+ " popl %0\n" \
+ " shl $2,%0\n" \
+ " incl %0\n" \
+ " jmp 2b\n" \
+ "4: pushl %%eax\n" \
+ " xorl %%eax,%%eax\n" \
+ " stosb\n" \
+ " popl %%eax\n" \
+ " incl %0\n" \
+ " jmp 2b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 0b,3b\n" \
+ " .long 1b,4b\n" \
+ ".previous" \
+ : "=c"(size), "=&S" (__d0), "=&D" (__d1)\
+ : "1"(from), "2"(to), "0"(size/4) \
+ : "memory"); \
+ break; \
+ case 2: \
+ __asm__ __volatile__( \
+ "0: rep; movsl\n" \
+ "1: movsw\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: pushl %0\n" \
+ " pushl %%eax\n" \
+ " xorl %%eax,%%eax\n" \
+ " rep; stosl\n" \
+ " stosw\n" \
+ " popl %%eax\n" \
+ " popl %0\n" \
+ " shl $2,%0\n" \
+ " addl $2,%0\n" \
+ " jmp 2b\n" \
+ "4: pushl %%eax\n" \
+ " xorl %%eax,%%eax\n" \
+ " stosw\n" \
+ " popl %%eax\n" \
+ " addl $2,%0\n" \
+ " jmp 2b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 0b,3b\n" \
+ " .long 1b,4b\n" \
+ ".previous" \
+ : "=c"(size), "=&S" (__d0), "=&D" (__d1)\
+ : "1"(from), "2"(to), "0"(size/4) \
+ : "memory"); \
+ break; \
+ case 3: \
+ __asm__ __volatile__( \
+ "0: rep; movsl\n" \
+ "1: movsw\n" \
+ "2: movsb\n" \
+ "3:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "4: pushl %0\n" \
+ " pushl %%eax\n" \
+ " xorl %%eax,%%eax\n" \
+ " rep; stosl\n" \
+ " stosw\n" \
+ " stosb\n" \
+ " popl %%eax\n" \
+ " popl %0\n" \
+ " shl $2,%0\n" \
+ " addl $3,%0\n" \
+ " jmp 2b\n" \
+ "5: pushl %%eax\n" \
+ " xorl %%eax,%%eax\n" \
+ " stosw\n" \
+ " stosb\n" \
+ " popl %%eax\n" \
+ " addl $3,%0\n" \
+ " jmp 2b\n" \
+ "6: pushl %%eax\n" \
+ " xorl %%eax,%%eax\n" \
+ " stosb\n" \
+ " popl %%eax\n" \
+ " incl %0\n" \
+ " jmp 3b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 0b,4b\n" \
+ " .long 1b,5b\n" \
+ " .long 2b,6b\n" \
+ ".previous" \
+ : "=c"(size), "=&S" (__d0), "=&D" (__d1)\
+ : "1"(from), "2"(to), "0"(size/4) \
+ : "memory"); \
+ break; \
+ } \
+} while (0)
+
+unsigned long __generic_copy_to_user(void *, const void *, unsigned long);
+unsigned long __generic_copy_from_user(void *, const void *, unsigned long);
+
+static inline unsigned long
+__constant_copy_to_user(void *to, const void *from, unsigned long n)
+{
+ prefetch(from);
+ if (access_ok(VERIFY_WRITE, to, n))
+ __constant_copy_user(to,from,n);
+ return n;
+}
+
+static inline unsigned long
+__constant_copy_from_user(void *to, const void *from, unsigned long n)
+{
+ if (access_ok(VERIFY_READ, from, n))
+ __constant_copy_user_zeroing(to,from,n);
+ else
+ memset(to, 0, n);
+ return n;
+}
+
+static inline unsigned long
+__constant_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
+{
+ __constant_copy_user(to,from,n);
+ return n;
+}
+
+static inline unsigned long
+__constant_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
+{
+ __constant_copy_user_zeroing(to,from,n);
+ return n;
+}
+
+#define copy_to_user(to,from,n) \
+ (__builtin_constant_p(n) ? \
+ __constant_copy_to_user((to),(from),(n)) : \
+ __generic_copy_to_user((to),(from),(n)))
+
+#define copy_from_user(to,from,n) \
+ (__builtin_constant_p(n) ? \
+ __constant_copy_from_user((to),(from),(n)) : \
+ __generic_copy_from_user((to),(from),(n)))
+
+#define __copy_to_user(to,from,n) \
+ (__builtin_constant_p(n) ? \
+ __constant_copy_to_user_nocheck((to),(from),(n)) : \
+ __generic_copy_to_user_nocheck((to),(from),(n)))
+
+#define __copy_from_user(to,from,n) \
+ (__builtin_constant_p(n) ? \
+ __constant_copy_from_user_nocheck((to),(from),(n)) : \
+ __generic_copy_from_user_nocheck((to),(from),(n)))
+
+long strncpy_from_user(char *dst, const char *src, long count);
+long __strncpy_from_user(char *dst, const char *src, long count);
+#define strlen_user(str) strnlen_user(str, ~0UL >> 1)
+long strnlen_user(const char *str, long n);
+unsigned long clear_user(void *mem, unsigned long len);
+unsigned long __clear_user(void *mem, unsigned long len);
+
+#endif /* __i386_UACCESS_H */
+++ /dev/null
-/*
- * include/asm-x86_64/processor.h
- *
- * Copyright (C) 1994 Linus Torvalds
- */
-
-#ifndef __ASM_X86_64_PROCESSOR_H
-#define __ASM_X86_64_PROCESSOR_H
-
-#include <asm/page.h>
-#include <asm/types.h>
-#include <asm/cpufeature.h>
-#include <asm/desc.h>
-#include <xen/config.h>
-#include <hypervisor-ifs/hypervisor-if.h>
-
-struct task_struct;
-
-#define TF_MASK 0x00000100
-#define IF_MASK 0x00000200
-#define IOPL_MASK 0x00003000
-#define NT_MASK 0x00004000
-#define VM_MASK 0x00020000
-#define AC_MASK 0x00040000
-#define VIF_MASK 0x00080000 /* virtual interrupt flag */
-#define VIP_MASK 0x00100000 /* virtual interrupt pending */
-#define ID_MASK 0x00200000
-
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ void *pc; asm volatile("leaq 1f(%%rip),%0\n1:":"=r"(pc)); pc; })
-
-/*
- * CPU type and hardware bug flags. Kept separately for each CPU.
- * Members of this structure are referenced in head.S, so think twice
- * before touching them. [mj]
- */
-
-struct cpuinfo_x86 {
- __u8 x86; /* CPU family */
- __u8 x86_vendor; /* CPU vendor */
- __u8 x86_model;
- __u8 x86_mask;
- int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
- __u32 x86_capability[NCAPINTS];
- char x86_vendor_id[16];
- char x86_model_id[64];
- int x86_cache_size; /* in KB - valid for CPUS which support this
- call */
- int x86_clflush_size;
- int x86_tlbsize; /* number of 4K pages in DTLB/ITLB combined(in pages)*/
- __u8 x86_virt_bits, x86_phys_bits;
- __u32 x86_power;
- unsigned long loops_per_jiffy;
-} ____cacheline_aligned;
-
-#define X86_VENDOR_INTEL 0
-#define X86_VENDOR_CYRIX 1
-#define X86_VENDOR_AMD 2
-#define X86_VENDOR_UMC 3
-#define X86_VENDOR_NEXGEN 4
-#define X86_VENDOR_CENTAUR 5
-#define X86_VENDOR_RISE 6
-#define X86_VENDOR_TRANSMETA 7
-#define X86_VENDOR_UNKNOWN 0xff
-
-/*
- * capabilities of CPUs
- */
-
-extern struct cpuinfo_x86 boot_cpu_data;
-extern struct tss_struct init_tss[NR_CPUS];
-
-#ifdef CONFIG_SMP
-extern struct cpuinfo_x86 cpu_data[];
-#define current_cpu_data cpu_data[smp_processor_id()]
-#else
-#define cpu_data (&boot_cpu_data)
-#define current_cpu_data boot_cpu_data
-#endif
-
-#define cpu_has_pge 1
-#define cpu_has_pse 1
-#define cpu_has_pae 1
-#define cpu_has_tsc 1
-#define cpu_has_de 1
-#define cpu_has_vme 1
-#define cpu_has_fxsr 1
-#define cpu_has_xmm 1
-#define cpu_has_apic (test_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability))
-
-extern char ignore_irq13;
-
-extern void identify_cpu(struct cpuinfo_x86 *);
-extern void print_cpu_info(struct cpuinfo_x86 *);
-extern void dodgy_tsc(void);
-
-/*
- * EFLAGS bits
- */
-#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
-#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
-#define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
-#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
-#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
-#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
-#define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
-#define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
-#define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
-#define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
-#define X86_EFLAGS_NT 0x00004000 /* Nested Task */
-#define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
-#define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
-#define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
-#define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
-#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
-#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
-
-/*
- * Generic CPUID function
- * FIXME: This really belongs to msr.h
- */
-extern inline void cpuid(int op, int *eax, int *ebx, int *ecx, int *edx)
-{
- __asm__("cpuid"
- : "=a" (*eax),
- "=b" (*ebx),
- "=c" (*ecx),
- "=d" (*edx)
- : "0" (op));
-}
-
-/*
- * CPUID functions returning a single datum
- */
-extern inline unsigned int cpuid_eax(unsigned int op)
-{
- unsigned int eax;
-
- __asm__("cpuid"
- : "=a" (eax)
- : "0" (op)
- : "bx", "cx", "dx");
- return eax;
-}
-extern inline unsigned int cpuid_ebx(unsigned int op)
-{
- unsigned int eax, ebx;
-
- __asm__("cpuid"
- : "=a" (eax), "=b" (ebx)
- : "0" (op)
- : "cx", "dx" );
- return ebx;
-}
-extern inline unsigned int cpuid_ecx(unsigned int op)
-{
- unsigned int eax, ecx;
-
- __asm__("cpuid"
- : "=a" (eax), "=c" (ecx)
- : "0" (op)
- : "bx", "dx" );
- return ecx;
-}
-extern inline unsigned int cpuid_edx(unsigned int op)
-{
- unsigned int eax, edx;
-
- __asm__("cpuid"
- : "=a" (eax), "=d" (edx)
- : "0" (op)
- : "bx", "cx");
- return edx;
-}
-
-
-/*
- * Intel CPU flags in CR0
- */
-#define X86_CR0_PE 0x00000001 /* Enable Protected Mode (RW) */
-#define X86_CR0_MP 0x00000002 /* Monitor Coprocessor (RW) */
-#define X86_CR0_EM 0x00000004 /* Require FPU Emulation (RO) */
-#define X86_CR0_TS 0x00000008 /* Task Switched (RW) */
-#define X86_CR0_NE 0x00000020 /* Numeric Error Reporting (RW) */
-#define X86_CR0_WP 0x00010000 /* Supervisor Write Protect (RW) */
-#define X86_CR0_AM 0x00040000 /* Alignment Checking (RW) */
-#define X86_CR0_NW 0x20000000 /* Not Write-Through (RW) */
-#define X86_CR0_CD 0x40000000 /* Cache Disable (RW) */
-#define X86_CR0_PG 0x80000000 /* Paging (RW) */
-
-#define read_cr0() ({ \
- unsigned long __dummy; \
- __asm__( \
- "movq %%cr0,%0\n\t" \
- :"=r" (__dummy)); \
- __dummy; \
-})
-
-#define write_cr0(x) \
- __asm__("movq %0,%%cr0": :"r" (x));
-
-
-
-/*
- * Intel CPU features in CR4
- */
-#define X86_CR4_VME 0x0001 /* enable vm86 extensions */
-#define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */
-#define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */
-#define X86_CR4_DE 0x0008 /* enable debugging extensions */
-#define X86_CR4_PSE 0x0010 /* enable page size extensions */
-#define X86_CR4_PAE 0x0020 /* enable physical address extensions */
-#define X86_CR4_MCE 0x0040 /* Machine check enable */
-#define X86_CR4_PGE 0x0080 /* enable global pages */
-#define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */
-#define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */
-#define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */
-
-/*
- * Save the cr4 feature set we're using (ie
- * Pentium 4MB enable and PPro Global page
- * enable), so that any CPU's that boot up
- * after us can get the correct flags.
- */
-extern unsigned long mmu_cr4_features;
-
-static inline void set_in_cr4 (unsigned long mask)
-{
- mmu_cr4_features |= mask;
- __asm__("movq %%cr4,%%rax\n\t"
- "orq %0,%%rax\n\t"
- "movq %%rax,%%cr4\n"
- : : "irg" (mask)
- :"ax");
-}
-
-static inline void clear_in_cr4 (unsigned long mask)
-{
- mmu_cr4_features &= ~mask;
- __asm__("movq %%cr4,%%rax\n\t"
- "andq %0,%%rax\n\t"
- "movq %%rax,%%cr4\n"
- : : "irg" (~mask)
- :"ax");
-}
-
-/*
- * Cyrix CPU configuration register indexes
- */
-#define CX86_CCR0 0xc0
-#define CX86_CCR1 0xc1
-#define CX86_CCR2 0xc2
-#define CX86_CCR3 0xc3
-#define CX86_CCR4 0xe8
-#define CX86_CCR5 0xe9
-#define CX86_CCR6 0xea
-#define CX86_CCR7 0xeb
-#define CX86_DIR0 0xfe
-#define CX86_DIR1 0xff
-#define CX86_ARR_BASE 0xc4
-#define CX86_RCR_BASE 0xdc
-
-/*
- * Cyrix CPU indexed register access macros
- */
-
-#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
-
-#define setCx86(reg, data) do { \
- outb((reg), 0x22); \
- outb((data), 0x23); \
-} while (0)
-
-/*
- * Bus types
- */
-#define EISA_bus 0
-#define MCA_bus 0
-#define MCA_bus__is_a_macro
-
-
-/*
- * User space process size: 512GB - 1GB (default).
- */
-#define TASK_SIZE (0x0000007fc0000000)
-
-/* This decides where the kernel will search for a free chunk of vm
- * space during mmap's.
- */
-#define TASK_UNMAPPED_32 0xa0000000
-#define TASK_UNMAPPED_64 (TASK_SIZE/3)
-#define TASK_UNMAPPED_BASE \
- ((current->thread.flags & THREAD_IA32) ? TASK_UNMAPPED_32 : TASK_UNMAPPED_64)
-
-/*
- * Size of io_bitmap in longwords: 32 is ports 0-0x3ff.
- */
-#define IO_BITMAP_SIZE 32
-#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
-#define INVALID_IO_BITMAP_OFFSET 0x8000
-
-struct i387_fxsave_struct {
- u16 cwd;
- u16 swd;
- u16 twd;
- u16 fop;
- u64 rip;
- u64 rdp;
- u32 mxcsr;
- u32 mxcsr_mask;
- u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
- u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 128 bytes */
- u32 padding[24];
-} __attribute__ ((aligned (16)));
-
-union i387_union {
- struct i387_fxsave_struct fxsave;
-};
-
-typedef struct {
- unsigned long seg;
-} mm_segment_t;
-
-struct tss_struct {
- unsigned short back_link,__blh;
-/* u32 reserved1; */
- u64 rsp0;
- u64 rsp1;
- u64 rsp2;
- u64 reserved2;
- u64 ist[7];
- u32 reserved3;
- u32 reserved4;
- u16 reserved5;
- u16 io_map_base;
- u32 io_bitmap[IO_BITMAP_SIZE];
-} __attribute__((packed)) ____cacheline_aligned;
-
-struct thread_struct {
- unsigned long guestos_sp;
- unsigned long guestos_ss;
- unsigned long rip;
- unsigned long rsp;
- unsigned long userrsp; /* Copy from PDA */
- unsigned long fs;
- unsigned long gs;
- unsigned short es, ds, fsindex, gsindex;
- enum {
- THREAD_IA32 = 0x0001,
- } flags;
-/* Hardware debugging registers */
- unsigned long debugreg[8]; /* %%db0-7 debug registers */
-/* floating point info */
- union i387_union i387;
-/* Trap info. */
- trap_info_t traps[256];
-};
-
-#define IDT_ENTRIES 256
-extern struct gate_struct idt_table[];
-extern struct gate_struct *idt_tables[];
-
-#define INIT_THREAD { \
- 0, 0, \
- 0, 0, 0, 0, \
- 0, 0, 0, 0, \
- 0, /* flags */ \
- { [0 ... 7] = 0 }, /* debugging registers */ \
- { { 0, }, }, /* 387 state */ \
- { {0} } /* io permissions */ \
-}
-
-#define INIT_TSS { \
- 0,0, /* back_link, __blh */ \
- 0, /* rsp0 */ \
- 0, 0, /* rsp1, rsp2 */ \
- 0, /* reserved */ \
- { [0 ... 6] = 0 }, /* ist[] */ \
- 0,0, /* reserved */ \
- 0, INVALID_IO_BITMAP_OFFSET, /* trace, bitmap */ \
- {~0, } /* ioperm */ \
-}
-
-struct mm_struct {
- /*
- * Every domain has a L1 pagetable of its own. Per-domain mappings
- * are put in this table (eg. the current GDT is mapped here).
- */
- l1_pgentry_t *perdomain_pt;
- pagetable_t pagetable;
- /* Current LDT details. */
- unsigned long ldt_base, ldt_ents, shadow_ldt_mapcnt;
- /* Next entry is passed to LGDT on domain switch. */
- char gdt[10];
-};
-
-#define IDLE0_MM \
-{ \
- perdomain_pt: 0, \
- pagetable: mk_pagetable(__pa(idle_pg_table)) \
-}
-
-/* Convenient accessor for mm.gdt. */
-#define SET_GDT_ENTRIES(_p, _e) ((*(u16 *)((_p)->mm.gdt + 0)) = (_e))
-#define SET_GDT_ADDRESS(_p, _a) ((*(u64 *)((_p)->mm.gdt + 2)) = (_a))
-#define GET_GDT_ENTRIES(_p) ((*(u16 *)((_p)->mm.gdt + 0)))
-#define GET_GDT_ADDRESS(_p) ((*(u64 *)((_p)->mm.gdt + 2)))
-
-long set_gdt(struct task_struct *p,
- unsigned long *frames,
- unsigned int entries);
-
-long set_debugreg(struct task_struct *p, int reg, unsigned long value);
-
-struct microcode {
- unsigned int hdrver;
- unsigned int rev;
- unsigned int date;
- unsigned int sig;
- unsigned int cksum;
- unsigned int ldrver;
- unsigned int pf;
- unsigned int reserved[5];
- unsigned int bits[500];
-};
-
-/* '6' because it used to be for P6 only (but now covers Pentium 4 as well) */
-#define MICROCODE_IOCFREE _IO('6',0)
-
-/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
-static inline void rep_nop(void)
-{
- __asm__ __volatile__("rep;nop");
-}
-
-#define cpu_relax() rep_nop()
-
-#define init_task (init_task_union.task)
-#define init_stack (init_task_union.stack)
-
-/* Avoid speculative execution by the CPU */
-extern inline void sync_core(void)
-{
- int tmp;
- asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
-}
-
-#define cpu_has_fpu 1
-
-#define ARCH_HAS_PREFETCH
-#define ARCH_HAS_PREFETCHW
-#define ARCH_HAS_SPINLOCK_PREFETCH
-
-#define prefetch(x) __builtin_prefetch((x),0)
-#define prefetchw(x) __builtin_prefetch((x),1)
-#define spin_lock_prefetch(x) prefetchw(x)
-#define cpu_relax() rep_nop()
-
-
-#endif /* __ASM_X86_64_PROCESSOR_H */
({ long __val_gu; \
int __ret_gu=1; \
switch(sizeof (*(ptr))) { \
-+ case 1: __ret_gu=copy_from_user(&__val_gu,ptr,1);break; \
-+ case 2: __ret_gu=copy_from_user(&__val_gu,ptr,2);break; \
-+ case 4: __ret_gu=copy_from_user(&__val_gu,ptr,4);break; \
-+ case 8: __ret_gu=copy_from_user(&__val_gu,ptr,8);break; \
-+ default: __ret_gu=copy_from_user(&__val_gu,ptr,sizeof(*(ptr)));break;\
- /*case 1: __get_user_x(1,__ret_gu,__val_gu,ptr); break;*/ \
- /*case 2: __get_user_x(2,__ret_gu,__val_gu,ptr); break;*/ \
- /*case 4: __get_user_x(4,__ret_gu,__val_gu,ptr); break;*/ \
- /*case 8: __get_user_x(8,__ret_gu,__val_gu,ptr); break;*/ \
- /*default: __get_user_bad(); break;*/ \
+ case 1: __get_user_x(1,__ret_gu,__val_gu,ptr); break; \
+ case 2: __get_user_x(2,__ret_gu,__val_gu,ptr); break; \
+ case 4: __get_user_x(4,__ret_gu,__val_gu,ptr); break; \
+ case 8: __get_user_x(8,__ret_gu,__val_gu,ptr); break; \
+ default: __get_user_bad(); break; \
} \
(x) = (__typeof__(*(ptr)))__val_gu; \
__ret_gu; \
* contiguous (or near contiguous) physical memory.
*/
#undef machine_to_phys_mapping
+#ifdef __x86_64__
+extern unsigned long *machine_to_phys_mapping;
+#else
#define machine_to_phys_mapping ((unsigned long *)RDWR_MPT_VIRT_START)
+#endif
/* Part of the domain API. */
int do_mmu_update(mmu_update_t *updates, int count, int *success_count);